Note. Boxplots display the interquartile range (IQR, center box), and the whiskers extend 1.5*IQR from the lower and upper hinge. The white point indicates the mean and the white center line indicates the median.
In a first step we import the raw Qualtrics data, which was downloaded as an SPSS file.
# Reset working directory to folder current file is saved in
#setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Import Qualtrics Survey Data
dt0Raw <- read_spss("data/raw data/Snowball+Corona+Long-Term+Coverage+-+Baseline_March+30,+2020_00.32.sav")
The raw data set includes 588 variables for 14401 cases.
Inspecting missing data in the items.
# Table: Missing Data per item
dt0Raw %>%
select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
naniar::miss_var_summary(.) %>% # by variable summary of missingness proportion
DT::datatable(.,
colnames = c("Variable", "Number Missing", "Percentage Missing"),
filter = 'top',
extensions = 'Buttons',
options = list(
columnDefs = list(list(className = 'dt-center')),
#autoWidth = TRUE,
dom = 'Bfrtlip',
buttons = c('copy', 'csv', 'excel', 'pdf', 'print'))) %>%
DT::formatRound('pct_miss', digits = 2)
# Plot: Missing Data per item
dt0Raw %>%
select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
naniar::gg_miss_var(.) # visualize by variable summary of missingness proportion
# Plot: Missing Data cumulative
dt0Raw %>%
select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
naniar::gg_miss_var_cumsum(.) # missingness development over survey
# Co-occurences of missingess - too many variables
#dt0Raw %>%
# select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
# select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
# naniar::gg_miss_upset(., nsets = n_var_miss(.)) # visualize missingess co-occurences
Filter the Preview responses.
# flag Preview Responses
dt1Preview <- dt0Raw %>%
mutate(FilterPreview = labelled(ifelse(Status == 0,0,1),
labels = c(preview = 1), label="Filter: survey preview response"))
Filter survey responses that were shorter than 10 minutes.
# truncate data:
tOutlierHigh <- dt1Preview %>%
select(Duration__in_seconds_) %>%
filter(Duration__in_seconds_<=stats::median(Duration__in_seconds_)+stats::mad(Duration__in_seconds_)*3.5) %>%
mutate(Minutes = Duration__in_seconds_/60)
# set time cut-off criterion:
tCutOff <- 10 #cut-off criterion in minutes
tCutOffPerc <- round(sum(tOutlierHigh$Minutes<tCutOff)/nrow(dt1Preview)*100,2) # percent of missing data with current cut-off criterion
tOutlierHigh$out <- tOutlierHigh$Minutes < 10
# plot histogram and missing
ggplot(data=tOutlierHigh, aes(x=Minutes, fill=out)) +
geom_histogram(bins=round(max(tOutlierHigh$Minutes),0),
alpha=.6) +
geom_vline(xintercept = tCutOff,
color = "darkred",
linetype = "longdash") +
geom_text(aes(x=tCutOff, label=paste0("time cut-off: ",tCutOff," Minutes\n"), y=Inf),
hjust = 1,
colour="darkred",
angle=90) +
geom_text(aes(x=tCutOff, label=paste0("\ndata loss: ",tCutOffPerc,"%"), y=Inf),
hjust = 1,
colour="darkred",
angle=90) +
scale_x_continuous(breaks = seq(0, round(max(tOutlierHigh$Minutes),0), 5)) +
scale_fill_manual(values=c("darkgrey","darkred")) +
labs(title = "Truncated Histogram: Survey Duration",
x = "Duration [Mintues]",
y = "Frequency Count",
caption = "Notes:
(1) Truncated: all participants who took less time than Median+3.5*MAD
(2) Each bin represents one Minute") +
theme_Publication() +
theme(legend.position = "none")
rm(tOutlierHigh, tCutOff, tCutOffPerc)
# flag anyone with less than 10 minutes survey duration
dt2Time <- dt1Preview %>%
mutate(FilterTime = labelled(ifelse(Duration__in_seconds_ > 600,0,1),
labels = c(`extremely quick` = 1), label="Filter: Took less than 10 minutes on survey"))
Filter participants, who have straightlined on the job insecurity scale, which includes a reverse coded item. We only flag people who straightlined outside the median categories because all “neither agree nor disagree” might be meaningful response.
# CheckMissingness pattern
naniar::gg_miss_upset(dt2Time %>%
select(ResponseId, jbInsec01, jbInsec02, jbInsec03) %>%
na_if(., -99) # all -99 into <NA>
)
# isolate respondents who have straightlined outside a the median categories (b/c all "neither agree nor disagree" might be meaningful response)
jobinsecRed <- dt2Time %>%
select(ResponseId, jbInsec01, jbInsec02, jbInsec03) %>%
na_if(., -99) %>% # all -99 into <NA>
na.omit() %>% # remove people who have missing data on one of the three items
mutate(mean = rowMeans(select(., c("jbInsec01", "jbInsec02", "jbInsec03"))),
sd = matrixStats::rowSds(as.matrix(select(., c("jbInsec01", "jbInsec02", "jbInsec03"))))) %>% # calculate row-means and row-sds
filter(sd == 0, mean != 0)
# flag anyone who straightlined on job insecurity
dt3Straightliner <- dt2Time %>%
mutate(FilterStraightliner = labelled(ifelse(!ResponseId %in% jobinsecRed$ResponseId,0,1),
labels = c(straightliner = 1), label="Filter: straightliner on Job Insecurity"))
rm(jobinsecRed)
Note: For each of the scales we do an item analysis, and combine the items to the mean- (.m), and factor scores (.fa). We also centered (.c) and standardized (.z) the mean scores. Most of these items are not labelled for SPSS yet.
Re-coding reverse coded items and the Qualtrics language codes.
# Recoded Items
dt4newVars <- dt3Straightliner %>%
mutate(jbInsec02_R = labelled(recode(as.numeric(jbInsec02), `-2` = 2, `-1` = 1, `0` = 0, `1` = -1, `2` = -2, `-99` = -99),
labels = NULL, label="Job Insecurity 02 (re-coded)"),
bor03_R = labelled(recode(as.numeric(bor03), `-2` = 2, `-1` = 1, `0` = 0, `1` = -1, `2` = -2),
labels = NULL, label="Boredom 03 (re-coded)"))
# Language
# Import Qualtrics Language Codes
qualtricsLanguage <- read_excel("data/raw data/qualtricsLanguageCodes.xlsx")
dt4newVars <- merge(x=dt4newVars, y=qualtricsLanguage, by="Q_Language", all.x=TRUE)
rm(qualtricsLanguage)
We currently have 712 different free text country responses. Here we aim to consolidate them into one variable.
dt4newVars <- dt4newVars %>%
mutate(coded_country = case_when(grepl('^usa$', country, ignore.case = T) | grepl('unites state', country, ignore.case = T) |
grepl('united state', country, ignore.case = T) | grepl('^america$', country, ignore.case = T) |
grepl('U.S.', country, ignore.case = T) | grepl('Estados Unidos', country, ignore.case = T) |
grepl('colorado', country, ignore.case = T) | grepl('^us$', country, ignore.case = T) |
grepl('xas', country, ignore.case = T) | grepl('sates', country, ignore.case = T) |
grepl('Amerika Serikat', country, ignore.case = T) | grepl('california', country, ignore.case = T) |
grepl('corlifornia', country, ignore.case = T) | grepl('états-unis', country, ignore.case = T) |
grepl('york', country, ignore.case = T) | grepl('yark', country, ignore.case = T) |
grepl('puerto rico', country, ignore.case = T) | grepl('^tx$', country, ignore.case = T) |
grepl('^tn$', country, ignore.case = T) | grepl('U S', country, ignore.case = T) ~ 'United States of America',
grepl('canad', country, ignore.case = T) | grepl('vancouver', country, ignore.case = T) ~ 'Canada',
grepl('mexico', country, ignore.case = T) | grepl('México', country, ignore.case = T) ~ 'Mexico',
grepl('spain', country, ignore.case = T) | grepl('esp', country, ignore.case = T) |
grepl('Spagna', country, ignore.case = T) | grepl('Spanien', country, ignore.case = T) |
grepl('Catal', country, ignore.case = T) | grepl('Euskal Herria', country, ignore.case = T) |
grepl('basque', country, ignore.case = T) | grepl('Eapaña', country, ignore.case = T) |
grepl('Esapaña', country, ignore.case = T) | grepl('madrid', country, ignore.case = T) |
grepl('Montalbán de Córdoba', country, ignore.case = T) | grepl('Pais vasco', country, ignore.case = T) |
grepl('Spanje', country, ignore.case = T) ~ 'Spain',
grepl('france', country, ignore.case = T) | grepl('Francia', country, ignore.case = T) |
grepl('Frankrijk', country, ignore.case = T) ~ 'France',
grepl('germany', country, ignore.case = T) | grepl('deutschland', country, ignore.case = T) |
grepl('Alemania', country, ignore.case = T) | grepl('germania', country, ignore.case = T) |
grepl('^Almanya$', country, ignore.case = T) | grepl('berlin', country, ignore.case = T) |
grepl('Duitsland', country, ignore.case = T) ~ 'Germany',
grepl('portugal', country, ignore.case = T) ~ 'Portugal',
grepl('weden', country, ignore.case = T) ~ 'Sweden',
grepl('netherland', country, ignore.case = T) | grepl('nederland', country, ignore.case = T) |
grepl('Niederlande', country, ignore.case = T) | grepl('Belanda', country, ignore.case = T) |
grepl('^NL$', country, ignore.case = T) | grepl('Olanda', country, ignore.case = T) |
grepl('Paesi Bassi', country, ignore.case = T) | grepl('bajos', country, ignore.case = T) |
grepl('Gelderland', country, ignore.case = T) | grepl('Hollanda', country, ignore.case = T) ~ 'Netherlands',
grepl('^indonesia$', country, ignore.case = T) | grepl('indonesian', country, ignore.case = T) |
grepl('kota Tarakan', country, ignore.case = T) | grepl('Imdonesia', country, ignore.case = T) |
grepl('Indònesia', country, ignore.case = T) | grepl('jakarta', country, ignore.case = T) ~ 'Indonesia',
grepl('ital', country, ignore.case = T) | grepl('Sardegna', country, ignore.case = T) |
grepl('Bisceglie', country, ignore.case = T) | grepl('Ladispoli', country, ignore.case = T) |
grepl('Castelforte', country, ignore.case = T) | grepl('milano', country, ignore.case = T) |
(grepl('roma', country, ignore.case = T) & !grepl('romania', country, ignore.case = T)) |
grepl('Dorgali', country, ignore.case = T) | grepl('bari', country, ignore.case = T) |
grepl('bologna', country, ignore.case = T) | grepl('Brescia', country, ignore.case = T) |
grepl('Cala gonone', country, ignore.case = T) | grepl('Chieti', country, ignore.case = T) |
grepl('Ferentino', country, ignore.case = T) | grepl('Frosinone', country, ignore.case = T) |
grepl('Gragnano lucca ', country, ignore.case = T) | grepl('Guidonia', country, ignore.case = T) |
grepl('Itaia', country, ignore.case = T) | grepl('İtalya', country, ignore.case = T) |
grepl('Mareno di Piave', country, ignore.case = T) | grepl('modena', country, ignore.case = T) |
grepl('Pellizzano', country, ignore.case = T) | grepl('Predazzo', country, ignore.case = T) |
grepl('Refrontolo', country, ignore.case = T) | grepl('Cosma e Damiano', country, ignore.case = T) |
grepl('Scalea', country, ignore.case = T) | grepl('Scauri', country, ignore.case = T) |
grepl('Segni', country, ignore.case = T) | grepl('SETTIMO VITTONE', country, ignore.case = T) |
grepl('Susegana', country, ignore.case = T) | grepl('Terralba', country, ignore.case = T) |
grepl('trento', country, ignore.case = T) | grepl('treviso', country, ignore.case = T) |
grepl('Tezze di Piave', country, ignore.case = T) | grepl('Valmontone', country, ignore.case = T) |
grepl('Vergato', country, ignore.case = T) | grepl('veneto', country, ignore.case = T) |
grepl('Gragnano lucca', country, ignore.case = T) ~ 'Italy',
grepl('hong kong', country, ignore.case = T) ~ 'Hong Kong S.A.R.',
grepl('phil', country, ignore.case = T) | grepl('Filipinas', country, ignore.case = T) ~ 'Philippines',
grepl('argentina', country, ignore.case = T) | grepl('arge', country, ignore.case = T) ~ 'Argentina',
grepl('pakistan', country, ignore.case = T) | grepl('Abbottabad', country, ignore.case = T) |
grepl('Peshawar', country, ignore.case = T) ~ 'Pakistan',
grepl('united kingdo', country, ignore.case = T) | grepl('^uk$', country, ignore.case = T) |
grepl('Reino Unido', country, ignore.case = T) | grepl('britain', country, ignore.case = T) |
grepl('Regno Unito', country, ignore.case = T) | grepl('u.k.', country, ignore.case = T) |
grepl('بريطانيا', country, ignore.case = T) | grepl('the uk', country, ignore.case = T) |
grepl('U K', country, ignore.case = T) | grepl('Verenigd Koninkrijk', country, ignore.case = T) |
grepl('Windsor', country, ignore.case = T) | grepl('scotland', country, ignore.case = T) |
grepl('england', country, ignore.case = T) | grepl('wales', country, ignore.case = T) |
grepl('İngiltere', country, ignore.case = T) | grepl('Northern Ireland', country, ignore.case = T) |
grepl('Egland', country, ignore.case = T) | grepl('^gb$', country, ignore.case = T) |
grepl('N Ireland', country, ignore.case = T) | grepl('Schotland', country, ignore.case = T) |
grepl('Scozia', country, ignore.case = T) ~ 'United Kingdom',
grepl('africa', country, ignore.case = T) | grepl('^SA$', country, ignore.case = T) |
grepl('Sudáfrica', country, ignore.case = T) | grepl('western cape', country, ignore.case = T) ~ 'South Africa',
grepl('^chile$', country, ignore.case = T) ~ 'Chile',
grepl('australia', country, ignore.case = T) | grepl('Austrija', country, ignore.case = T) ~ 'Australia',
grepl('colombia', country, ignore.case = T) ~ 'Colombia',
grepl('turkey', country, ignore.case = T) | grepl('tür', country, ignore.case = T) ~ 'Turkey',
grepl('taiwan', country, ignore.case = T) ~ 'Taiwan',
grepl('^Venezuela$', country, ignore.case = T) ~ 'Venezuela',
grepl('israel', country, ignore.case = T) | grepl('اللد', country, ignore.case = T) |
grepl('اسرائيل', country, ignore.case = T) | grepl('كفر قاسم', country, ignore.case = T) |
grepl('Isreal', country, ignore.case = T) | grepl('רמלה', country, ignore.case = T) ~ 'Israel',
grepl('greece', country, ignore.case = T) | grepl('Grecia', country, ignore.case = T) ~ 'Greece',
grepl('austria', country, ignore.case = T) | grepl('sterreich', country, ignore.case = T) ~ 'Austria',
grepl('new zealand', country, ignore.case = T) | grepl('Neuseeland', country, ignore.case = T) ~ 'New Zealand',
grepl('Tuni', country, ignore.case = T) | grepl('تونس', country, ignore.case = T) ~ 'Tunisia',
grepl('Belg', country, ignore.case = T) | grepl('Bélgica', country, ignore.case = T) ~ 'Belgium',
grepl('China', country, ignore.case = T) ~ 'China',
grepl('cyp', country, ignore.case = T) ~ 'Cyprus',
grepl('Schweiz', country, ignore.case = T) | grepl('Suiza', country, ignore.case = T) |
grepl('Svizzera', country, ignore.case = T) | grepl('Zwitserland', country, ignore.case = T) |
grepl('switzerland', country, ignore.case = T) ~ 'Switzerland',
grepl('United Arab Emirates', country, ignore.case = T) | grepl('uae', country, ignore.case = T) ~ 'United Arab Emirates',
grepl('Croa', country, ignore.case = T) ~ 'Croatia',
grepl('india', country, ignore.case = T) ~ 'India',
grepl('algeri', country, ignore.case = T) | grepl('الجزائر', country, ignore.case = T) |
grepl('Algérie', country, ignore.case = T) ~ 'Algeria',
grepl('bulgaria', country, ignore.case = T) ~ 'Bulgaria',
grepl('Poland', country, ignore.case = T) | grepl('POLONIA', country, ignore.case = T) ~ 'Poland',
grepl('romania', country, ignore.case = T) ~ 'Romania',
grepl('singapore', country, ignore.case = T) ~ 'Singapore',
grepl('Srbija', country, ignore.case = T) | grepl('serbia', country, ignore.case = T) |
grepl('Србија', country, ignore.case = T) ~ 'Republic of Serbia',
grepl('czech', country, ignore.case = T) | grepl('checa', country, ignore.case = T) ~ 'Czech Republic',
grepl('lux', country, ignore.case = T) ~ 'Luxembourg',
grepl('slova', country, ignore.case = T) ~ 'Slovakia',
grepl('brazil', country, ignore.case = T) | grepl('brasil', country, ignore.case = T)~ 'Brazil',
grepl('^ireland$', country, ignore.case = T) | grepl('Irlanda', country, ignore.case = T) ~ 'Ireland',
grepl('japan', country, ignore.case = T) | grepl('Giappone', country, ignore.case = T) |
grepl('Japonya', country, ignore.case = T) ~ 'Japan',
grepl('Malay', country, ignore.case = T) ~ 'Malaysia',
grepl('nigeria', country, ignore.case = T) ~ 'Nigeria',
grepl('Riyad', country, ignore.case = T) | grepl('^Saudi arabia$', country, ignore.case = T) |
grepl('Arabia Saudita', country, ignore.case = T) | grepl('^saudi$', country, ignore.case = T) |
grepl('Kingdom of Saudia arabia', country, ignore.case = T) | grepl('KSA', country, ignore.case = T) |
grepl('k.s.a', country, ignore.case = T) | grepl('Arabie saoudite', country, ignore.case = T) |
grepl('الرياض', country, ignore.case = T) | grepl('السعودية', country, ignore.case = T) |
grepl('السعوديه', country, ignore.case = T) ~ 'Saudi Arabia',
grepl('^thailand$', country, ignore.case = T) ~ 'Thailand',
grepl('urug', country, ignore.case = T) ~ 'Uruguay',
grepl('costa', country, ignore.case = T) ~ 'Costa Rica',
grepl('ecuador', country, ignore.case = T) ~ 'Ecuador',
grepl('finland', country, ignore.case = T) ~ 'Finland',
grepl('guat', country, ignore.case = T) ~ 'Guatemala',
grepl('iceland', country, ignore.case = T) ~ 'Iceland',
grepl('iraq', country, ignore.case = T) | grepl('العراق', country, ignore.case = T) ~ 'Iraq',
grepl('iran', country, ignore.case = T) ~ 'Iran',
grepl('lebanon', country, ignore.case = T) | grepl('liban', country, ignore.case = T) ~ 'Lebanon',
grepl('norway', country, ignore.case = T) ~ 'Norway',
grepl('palestine', country, ignore.case = T) | grepl('فلسطين ', country, ignore.case = T) |
grepl('^فلسطين$', country, ignore.case = T) | grepl('الرملة', country, ignore.case = T) ~ 'Palestine',
grepl('peru', country, ignore.case = T) ~ 'Peru',
grepl('domin', country, ignore.case = T) ~ 'Dominican Republic',
grepl('albania', country, ignore.case = T) ~ 'Albania',
grepl('andorra', country, ignore.case = T) ~ 'Andorra',
grepl('bahrain', country, ignore.case = T) ~ 'Bahrain',
grepl('bangladesh', country, ignore.case = T) ~ 'Bangladesh',
grepl('botswana', country, ignore.case = T) ~ 'Botswana',
grepl('camer', country, ignore.case = T) ~ 'Cameroon',
grepl('المغرب', country, ignore.case = T) | grepl('Maroc', country, ignore.case = T) ~ 'Morocco',
grepl('jordan', country, ignore.case = T) ~ 'Jordan',
grepl('ليبيا', country, ignore.case = T) ~ 'Libya',
grepl('مصر', country, ignore.case = T) ~ 'Egypt',
grepl('mark', country, ignore.case = T) ~ 'Denmark',
grepl('salvador', country, ignore.case = T) ~ 'El Salvador',
grepl('estonia', country, ignore.case = T) ~ 'Estonia',
grepl('korea', country, ignore.case = T) | grepl('Güney Kore', country, ignore.case = T) ~ 'South Korea',
grepl('hungary', country, ignore.case = T) ~ 'Hungary',
grepl('maurice', country, ignore.case = T) ~ 'Mauritius',
grepl('jamaica', country, ignore.case = T) ~ 'Jamaica',
grepl('kenia', country, ignore.case = T) ~ 'Kenya',
grepl('laos', country, ignore.case = T) ~ 'Laos',
grepl('latvia', country, ignore.case = T) ~ 'Latvia',
grepl('malta', country, ignore.case = T) ~ 'Malta',
grepl('myanmar', country, ignore.case = T) ~ 'Myanmar',
grepl('nepal', country, ignore.case = T) ~ 'Nepal',
grepl('^oman$', country, ignore.case = T) ~ 'Oman',
grepl('qatar', country, ignore.case = T) ~ 'Qatar',
grepl('panam', country, ignore.case = T) ~ 'Panama',
grepl('tanzania', country, ignore.case = T) ~ 'United Republic of Tanzania',
grepl('vietnam', country, ignore.case = T) ~ 'Vietnam'))
# tallying un-recoded countries to see missing ones
country_counts <- dt4newVars %>%
filter(is.na(coded_country)) %>%
group_by(country) %>%
tally()
Action needed:
We currently have 712 different free text country responses. The most recent codes leaves 262 responses are still not consolidated.
Political orientation was measured per language. We merge these variables here.
# clean-up country coding:
rm(country_counts)
# political orientation
dt4newVars <- dt4newVars %>%
mutate(PolOrX = labelled(rowSums(select(., ends_with("_x")), na.rm = T),
labels = NULL, label="Political Compass X-Coordinate"),
PolOrY = labelled(rowSums(select(., ends_with("_y")), na.rm = T),
labels = NULL, label="Political Compass Y-Coordinate"),
PolOrAuthoritarianLeft = rowSums(select(., ends_with("_Authoritarian_Left")), na.rm = T),
PolOrAuthoritarianLeftLab = dplyr::recode(PolOrAuthoritarianLeft, `1` = "Authoritarian Left", `0` = ""),
PolOrAuthoritarianRight = rowSums(select(., ends_with("_Authoritarian_right")), na.rm = T),
PolOrAuthoritarianRightLab = dplyr::recode(PolOrAuthoritarianRight, `1` = "Authoritarian Right", `0` = ""),
PolOrLibertarianLeft = rowSums(select(., ends_with("_Libertarian_Left")), na.rm = T),
PolOrLibertarianLeftLab = dplyr::recode(PolOrLibertarianLeft, `1` = "Libertarian Left", `0` = ""),
PolOrLibertarianRight = rowSums(select(., ends_with("_Libertarian_Right")), na.rm = T),
PolOrLibertarianRightLab = dplyr::recode(PolOrLibertarianRight, `1` = "Libertarian Right", `0` = ""),
PolOrOther = rowSums(select(., ends_with("_Other")), na.rm = T),
PolOrOtherLab = dplyr::recode(PolOrOther, `1` = "Other", `0` = ""),
PolOrCat = paste0(PolOrAuthoritarianLeftLab,
PolOrAuthoritarianRightLab,
PolOrLibertarianLeftLab,
PolOrLibertarianRightLab,
PolOrOtherLab),
PolOrCat = as.factor(na_if(PolOrCat, ""))) %>%
select(-starts_with("Pol"),
PolOrX,
PolOrY,
PolOrCat)
attr(dt4newVars$PolOrCat,'label') <- 'Political Orientation Quadrant'
# High Arousal Negative
## Anger not measured in wave 1
pairs.panels.new(dt4newVars %>% select(affAnx, affNerv))
cat("<br>")
dt4newVars$affHighNeg.m <- scoreItems(keys=c(1,1), items = dt4newVars %>% select(affAnx, affNerv), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt4newVars$affHighNeg.m, skew=F)) %>%
mutate(vars = "High Arousal Negative Affect") %>%
kable(., caption = "High Arousal Negative Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| High Arousal Negative Affect | 14401 | 2.896 | 1.04 | 1 | 5 | 4 | 0.0087 |
dt4newVars$affHighNeg.c <- scale(dt4newVars$affHighNeg.m, scale = F, center = T)
dt4newVars$affHighNeg.z <- scale(dt4newVars$affHighNeg.m, scale = T)
dt4newVars$affHighNeg.fa <- fa(dt4newVars %>% select(affAnx, affNerv))$scores
# Low Arousal Negative Affect
ia.affLowNeg <- dt4newVars %>%
dplyr::select(affBor, affExh, affDepr) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.affLowNeg$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.57 . Furthermore, deleting item(s) 1 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.affLowNeg)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| affDepr | 0.4732 | 0.8283 | 2.199 | 1.152 |
| affExh | 0.3840 | 0.5209 | 2.524 | 1.230 |
| affBor | 0.2866 | 0.3594 | 2.501 | 1.295 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(affBor, affExh, affDepr))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Low Arousal Negative Affect: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| affBor | 12344 | 2.501 | 1.295 | 2 | 2.401 | 1.483 | 1 | 5 | 4 | 0.3917 | -1.0254 | 0.0117 |
| affExh | 12327 | 2.524 | 1.230 | 2 | 2.457 | 1.483 | 1 | 5 | 4 | 0.3094 | -1.0094 | 0.0111 |
| affDepr | 12341 | 2.199 | 1.152 | 2 | 2.079 | 1.483 | 1 | 5 | 4 | 0.6651 | -0.5391 | 0.0104 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(affBor, affExh, affDepr))
cat("<br>")
dt4newVars$affLowNeg.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(affBor, affExh, affDepr), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt4newVars$affLowNeg.m, skew=F)) %>%
mutate(vars = "Low Arousal Negative Affect") %>%
kable(., caption = "Low Arousal Negative Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Low Arousal Negative Affect | 14401 | 2.349 | 0.8433 | 1 | 5 | 4 | 0.007 |
dt4newVars$affLowNeg.c <- scale(dt4newVars$affLowNeg.m, scale = F, center = T)
dt4newVars$affLowNeg.z <- scale(dt4newVars$affLowNeg.m, scale = T)
dt4newVars$affLowNeg.fa <- fa(dt4newVars %>% select(affBor, affExh, affDepr))$scores
# Low Arousal Positive Affect
ia.affLowPos <- dt4newVars %>%
dplyr::select(affCalm, affContent, affRel) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.affLowPos$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.74 . Furthermore, deleting item(s) 2 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.affLowPos)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| affRel | 0.6268 | 0.8116 | 2.528 | 1.099 |
| affCalm | 0.6159 | 0.7853 | 2.792 | 1.069 |
| affContent | 0.4463 | 0.5058 | 2.574 | 1.099 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(affCalm, affContent, affRel))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Low Arousal Positive Affect: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| affCalm | 12329 | 2.792 | 1.069 | 3 | 2.811 | 1.483 | 1 | 5 | 4 | 0.0041 | -0.7403 | 0.0096 |
| affContent | 12318 | 2.574 | 1.099 | 3 | 2.551 | 1.483 | 1 | 5 | 4 | 0.1428 | -0.8217 | 0.0099 |
| affRel | 12324 | 2.528 | 1.099 | 3 | 2.494 | 1.483 | 1 | 5 | 4 | 0.2130 | -0.7984 | 0.0099 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(affCalm, affContent, affRel))
cat("<br>")
dt4newVars$affLowPos.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(affCalm, affContent, affRel), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt4newVars$affLowPos.m, skew=F)) %>%
mutate(vars = "Low Arousal Positive Affect") %>%
kable(., caption = "Low Arousal Positive Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Low Arousal Positive Affect | 14401 | 2.685 | 0.8263 | 1 | 5 | 4 | 0.0069 |
dt4newVars$affLowPos.c <- scale(dt4newVars$affLowPos.m, scale = F, center = T)
dt4newVars$affLowPos.z <- scale(dt4newVars$affLowPos.m, scale = T)
dt4newVars$affLowPos.fa <- fa(dt4newVars %>% select(affCalm, affContent, affRel))$scores
# High Arousal Positive Affect
ia.affHighPos <- dt4newVars %>%
dplyr::select(affEnerg, affExc, affInsp) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.affHighPos$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.67 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.affHighPos)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| affInsp | 0.5265 | 0.7258 | 2.308 | 1.142 |
| affEnerg | 0.4898 | 0.6415 | 2.378 | 1.077 |
| affExc | 0.4410 | 0.5522 | 2.099 | 1.113 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(affEnerg, affExc, affInsp))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "High Arousal Positive Affect: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| affEnerg | 12318 | 2.378 | 1.077 | 2 | 2.314 | 1.483 | 1 | 5 | 4 | 0.3196 | -0.7006 | 0.0097 |
| affExc | 12315 | 2.099 | 1.113 | 2 | 1.969 | 1.483 | 1 | 5 | 4 | 0.6913 | -0.5068 | 0.0100 |
| affInsp | 12327 | 2.308 | 1.142 | 2 | 2.217 | 1.483 | 1 | 5 | 4 | 0.4658 | -0.7418 | 0.0103 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(affEnerg, affExc, affInsp))
cat("<br>")
dt4newVars$affHighPos.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(affEnerg, affExc, affInsp), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt4newVars$affHighPos.m, skew=F)) %>%
mutate(vars = "High Arousal Positive Affect") %>%
kable(., caption = "High Arousal Positive Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| High Arousal Positive Affect | 14401 | 2.224 | 0.8098 | 1 | 5 | 4 | 0.0067 |
dt4newVars$affHighPos.c <- scale(dt4newVars$affHighPos.m, scale = F, center = T)
dt4newVars$affHighPos.z <- scale(dt4newVars$affHighPos.m, scale = T)
dt4newVars$affHighPos.fa <- fa(dt4newVars %>% select(affEnerg, affExc, affInsp))$scores
ia.lone<- dt4newVars %>%
dplyr::select(starts_with("lone")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.lone$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.8 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.lone)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| lone01 | 0.6827 | 0.8219 | 2.332 | 1.133 |
| lone02 | 0.6372 | 0.7399 | 2.642 | 1.217 |
| lone03 | 0.6196 | 0.7115 | 1.945 | 1.092 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(starts_with("lone")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Loneliness: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| lone01 | 12219 | 2.332 | 1.133 | 2 | 2.251 | 1.483 | 1 | 5 | 4 | 0.4004 | -0.7876 | 0.0102 |
| lone02 | 12208 | 2.642 | 1.217 | 3 | 2.599 | 1.483 | 1 | 5 | 4 | 0.1507 | -1.0071 | 0.0110 |
| lone03 | 12204 | 1.945 | 1.092 | 2 | 1.774 | 1.483 | 1 | 5 | 4 | 0.9601 | 0.0146 | 0.0099 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(starts_with("lone")))
cat("<br>")
dt4newVars$lone.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(starts_with("lone")), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt4newVars$lone.m, skew=F)) %>%
mutate(vars = "Loneliness") %>%
kable(., caption = "Loneliness: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Loneliness | 14401 | 2.311 | 0.8961 | 1 | 5 | 4 | 0.0075 |
dt4newVars$lone.c <- scale(dt4newVars$lone.m, scale = F, center = T)
dt4newVars$lone.z <- scale(dt4newVars$lone.m, scale = T)
dt4newVars$lone.fa <- fa(dt4newVars %>% select(starts_with("lone")))$scores
ia.bor<- dt4newVars %>%
dplyr::select(starts_with("bor0"), -bor03) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.bor$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.51 . Item(s) that exhibited low correlation with the rest of the scale were: 3 . Furthermore, deleting item(s) 3 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.bor)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| bor02 | 0.4888 | 0.7379 | 0.1007 | 1.878 |
| bor01 | 0.4635 | 0.7304 | 0.4189 | 1.902 |
| bor03_R | 0.0776 | 0.4080 | -0.2824 | 1.681 |
pairs.panels.new(dt4newVars %>% select(starts_with("bor0"), -bor03))
Item dropped:
Item three was not well behaved. It seems to measure something else. We dropped it for now.
pairs.panels.new(dt4newVars %>% select(starts_with("bor0"), -bor03, -bor03_R))
cat("<br>")
dt4newVars$bor.m <- scoreItems(keys=c(1,1), items = dt4newVars %>% select(starts_with("bor0"), -bor03, -bor03_R), min = -3, max = 3)$scores
as.data.frame(psych::describe(dt4newVars$bor.m, skew=F)) %>%
mutate(vars = "Boredom") %>%
kable(., caption = "Boredom: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Boredom | 14401 | 0.2134 | 1.548 | -3 | 3 | 6 | 0.0129 |
dt4newVars$bor.c <- scale(dt4newVars$bor.m, scale = F, center = T)
dt4newVars$bor.z <- scale(dt4newVars$bor.m, scale = T)
dt4newVars$bor.fa <- fa(dt4newVars %>% select(starts_with("bor0"), -bor03, -bor03_R))$scores
cat(crayon::bold("Offline Isolation"))
Offline Isolation
ia.isoPers <- dt4newVars %>%
dplyr::select(ends_with("inPerson")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.isoPers$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.58 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.isoPers)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| isoOthPpl_inPerson | 0.4618 | 0.7605 | 1.955 | 2.205 |
| isoImmi_inPerson | 0.3782 | 0.5165 | 0.501 | 1.419 |
| isoFriends_inPerson | 0.3316 | 0.4334 | 2.067 | 2.482 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(ends_with("inPerson")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Isolation offline: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| isoFriends_inPerson | 12158 | 2.067 | 2.482 | 1 | 1.708 | 1.483 | 0 | 7 | 7 | 0.9742 | -0.4959 | 0.0225 |
| isoOthPpl_inPerson | 12105 | 1.955 | 2.205 | 1 | 1.597 | 1.483 | 0 | 7 | 7 | 1.0423 | -0.0553 | 0.0200 |
| isoImmi_inPerson | 12033 | 0.501 | 1.419 | 0 | 0.087 | 0.000 | 0 | 7 | 7 | 3.2382 | 10.0072 | 0.0129 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(ends_with("inPerson")))
cat("<br>")
dt4newVars$isoPers.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(ends_with("inPerson")), min = 0, max = 7)$scores
as.data.frame(psych::describe(dt4newVars$isoPers.m, skew=F)) %>%
mutate(vars = "Isolation offline") %>%
kable(., caption = "Isolation offline: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Isolation offline | 14401 | 1.374 | 1.439 | 0 | 7 | 7 | 0.012 |
dt4newVars$isoPers.c <- scale(dt4newVars$isoPers.m, scale = F, center = T)
dt4newVars$isoPers.z <- scale(dt4newVars$isoPers.m, scale = T)
dt4newVars$isoPers.fa <- fa(dt4newVars %>% select(ends_with("inPerson")))$scores
cat(crayon::bold("Online Isolation"))
Online Isolation
ia.isoOnl <- dt4newVars %>%
dplyr::select(ends_with("online")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.isoOnl$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.54 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.isoOnl)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| isoOthPpl_online | 0.4658 | 0.9355 | 2.9763 | 2.730 |
| isoFriends_online | 0.3011 | 0.3776 | 4.8793 | 2.347 |
| isoImmi_online | 0.2990 | 0.3746 | 0.8074 | 1.885 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(ends_with("inPerson")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Isolation online: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| isoFriends_inPerson | 12158 | 2.067 | 2.482 | 1 | 1.708 | 1.483 | 0 | 7 | 7 | 0.9742 | -0.4959 | 0.0225 |
| isoOthPpl_inPerson | 12105 | 1.955 | 2.205 | 1 | 1.597 | 1.483 | 0 | 7 | 7 | 1.0423 | -0.0553 | 0.0200 |
| isoImmi_inPerson | 12033 | 0.501 | 1.419 | 0 | 0.087 | 0.000 | 0 | 7 | 7 | 3.2382 | 10.0072 | 0.0129 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(ends_with("online")))
cat("<br>")
dt4newVars$isoOnl.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(ends_with("online")), min = 0, max = 7)$scores
as.data.frame(psych::describe(dt4newVars$isoPers.m, skew=F)) %>%
mutate(vars = "Isolation online") %>%
kable(., caption = "Isolation online: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Isolation online | 14401 | 1.374 | 1.439 | 0 | 7 | 7 | 0.012 |
dt4newVars$isoOnl.c <- scale(dt4newVars$isoOnl.m, scale = F, center = T)
dt4newVars$isoOnl.z <- scale(dt4newVars$isoOnl.m, scale = T)
dt4newVars$isoOnl.fa <- fa(dt4newVars %>% select(ends_with("online")))$scores
# Leave House
as.data.frame(psych::describe(dt4newVars$houseLeave, skew=F)) %>%
mutate(vars = "Leaving House") %>%
kable(., caption = "Leaving House: Item Descriptive", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Leaving House | 11876 | 2.438 | 1.07 | 1 | 4 | 3 | 0.0098 |
No responses yet
# extC19Msg
ia.ext <- dt4newVars %>%
dplyr::select(starts_with("extC19"), -extC19Msg) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.ext$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.77 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.ext)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| extC19Rules | 0.7054 | 0.9270 | 4.050 | 1.456 |
| extC19Org | 0.5827 | 0.6724 | 3.743 | 1.432 |
| extC19Punish | 0.5358 | 0.6037 | 3.140 | 1.669 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(starts_with("extC19"), -extC19Msg))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Community response: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| extC19Rules | 11967 | 4.050 | 1.456 | 4 | 4.138 | 1.483 | 1 | 6 | 5 | -0.4138 | -0.7261 | 0.0133 |
| extC19Punish | 11962 | 3.140 | 1.669 | 3 | 3.051 | 1.483 | 1 | 6 | 5 | 0.2305 | -1.1750 | 0.0153 |
| extC19Org | 11967 | 3.743 | 1.432 | 4 | 3.774 | 1.483 | 1 | 6 | 5 | -0.1904 | -0.7870 | 0.0131 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(starts_with("extC19"), -extC19Msg))
cat("<br>")
dt4newVars$ext.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(starts_with("extC19"), -extC19Msg),
min = 1, max = 6)$scores
as.data.frame(psych::describe(dt4newVars$ext.m, skew=F)) %>%
mutate(vars = "Community response") %>%
kable(., caption = "Community response: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Community response | 14401 | 3.648 | 1.147 | 1 | 6 | 5 | 0.0096 |
dt4newVars$ext.c <- scale(dt4newVars$ext.m, scale = F, center = T)
dt4newVars$ext.z <- scale(dt4newVars$ext.m, scale = T)
dt4newVars$ext.fa <- fa(dt4newVars %>% select(starts_with("extC19"), -extC19Msg))$scores
ia.beh <- dt4newVars %>%
dplyr::select(starts_with("c19per")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.beh$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.68 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.beh)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| c19perBeh02 | 0.6036 | 0.9016 | 2.622 | 0.8603 |
| c19perBeh03 | 0.4663 | 0.5644 | 2.073 | 1.3758 |
| c19perBeh01 | 0.4304 | 0.5100 | 2.468 | 0.9603 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(starts_with("c19per")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Behavioral response: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| c19perBeh01 | 12009 | 2.468 | 0.9603 | 3 | 2.684 | 0 | -3 | 3 | 6 | -2.879 | 10.787 | 0.0088 |
| c19perBeh02 | 12012 | 2.622 | 0.8603 | 3 | 2.816 | 0 | -3 | 3 | 6 | -3.577 | 16.371 | 0.0078 |
| c19perBeh03 | 12013 | 2.073 | 1.3758 | 3 | 2.375 | 0 | -3 | 3 | 6 | -1.862 | 3.236 | 0.0126 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(starts_with("c19per")))
cat("<br>")
dt4newVars$beh.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(starts_with("c19per")),
min = -3, max = 3)$scores
as.data.frame(psych::describe(dt4newVars$beh.m, skew=F)) %>%
mutate(vars = "Behavioral response") %>%
kable(., caption = "Behavioral response: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Behavioral response | 14401 | 2.489 | 0.8045 | -3 | 3 | 6 | 0.0067 |
dt4newVars$beh.c <- scale(dt4newVars$beh.m, scale = F, center = T)
dt4newVars$beh.z <- scale(dt4newVars$beh.m, scale = T)
dt4newVars$beh.fa <- fa(dt4newVars %>% select(starts_with("c19per")))$scores
as.data.frame(psych::describe(dt4newVars$c19Hope)) %>%
mutate(vars = "Hope") %>%
kable(., caption = "Hope: Item Descriptive", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| Hope | 12039 | 1.038 | 1.622 | 1 | 1.211 | 1.483 | -3 | 3 | 6 | -0.7986 | -0.2141 | 0.0148 |
ggplot(dt4newVars, aes(x = c19Hope)) +
geom_histogram(binwidth=1, alpha=0.5) +
#geom_density(alpha=0.6)+
labs(title="Hope distribution",x="Corona Virus Hope", y = "Frequency") +
theme_Publication()
as.data.frame(psych::describe(dt4newVars$c19Eff)) %>%
mutate(vars = "Efficacy") %>%
kable(., caption = "Efficacy: Item Descriptive", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| Efficacy | 12035 | 0.7914 | 1.658 | 1 | 0.9204 | 1.483 | -3 | 3 | 6 | -0.6522 | -0.4741 | 0.0151 |
ggplot(dt4newVars, aes(x = c19Eff)) +
geom_histogram(binwidth=1, alpha=0.5) +
#geom_density(alpha=0.6)+
labs(title="Efficacy distribution",x="Corona Virus Efficacy", y = "Frequency") +
theme_Publication()
ia.para <- dt4newVars %>%
dplyr::select(starts_with("para")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.para$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.71 . Furthermore, deleting item(s) 1 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.para)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| para02 | 0.6332 | 0.8824 | 2.381 | 2.600 |
| para03 | 0.5701 | 0.7143 | 2.325 | 2.623 |
| para01 | 0.4008 | 0.4533 | 5.282 | 3.014 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(starts_with("para")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "State Paranoia: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| para01 | 11712 | 5.282 | 3.014 | 6 | 5.359 | 2.965 | 0 | 10 | 10 | -0.2445 | -0.9668 | 0.0278 |
| para02 | 11708 | 2.381 | 2.600 | 2 | 1.988 | 2.965 | 0 | 10 | 10 | 1.0278 | 0.2353 | 0.0240 |
| para03 | 11707 | 2.325 | 2.623 | 1 | 1.913 | 1.483 | 0 | 10 | 10 | 1.0573 | 0.2038 | 0.0242 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(starts_with("para")))
cat("<br>")
dt4newVars$para.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(starts_with("para")),
min = 0, max = 10)$scores
as.data.frame(psych::describe(dt4newVars$para.m, skew=F)) %>%
mutate(vars = "State Paranoia") %>%
kable(., caption = "State Paranoia: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| State Paranoia | 14401 | 3.268 | 1.976 | 0 | 10 | 10 | 0.0165 |
dt4newVars$para.c <- scale(dt4newVars$para.m, scale = F, center = T)
dt4newVars$para.z <- scale(dt4newVars$para.m, scale = T)
dt4newVars$para.fa <- fa(dt4newVars %>% select(starts_with("para")))$scores
ia.consp <- dt4newVars %>%
dplyr::select(starts_with("consp")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.consp$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.69 . Furthermore, deleting item(s) 3 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.consp)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| consp02 | 0.5997 | 0.8273 | 7.200 | 2.479 |
| consp01 | 0.5749 | 0.7564 | 6.888 | 2.616 |
| consp03 | 0.3693 | 0.4205 | 5.325 | 2.735 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(starts_with("consp")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Conspiracy Theory: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| consp01 | 11705 | 6.888 | 2.616 | 7 | 7.168 | 2.965 | 0 | 10 | 10 | -0.6867 | -0.2185 | 0.0242 |
| consp02 | 11694 | 7.200 | 2.479 | 8 | 7.504 | 2.965 | 0 | 10 | 10 | -0.8578 | 0.1660 | 0.0229 |
| consp03 | 11696 | 5.325 | 2.735 | 5 | 5.371 | 2.965 | 0 | 10 | 10 | -0.1192 | -0.7538 | 0.0253 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(starts_with("consp")))
cat("<br>")
dt4newVars$consp.m <- scoreItems(keys=c(1,1,1), items = dt4newVars %>% select(starts_with("consp")),
min = 0, max = 10)$scores
as.data.frame(psych::describe(dt4newVars$consp.m, skew=F)) %>%
mutate(vars = "Conspiracy Theory") %>%
kable(., caption = "Conspiracy Theory: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Conspiracy Theory | 14401 | 6.508 | 1.859 | 0 | 10 | 10 | 0.0155 |
dt4newVars$para.c <- scale(dt4newVars$consp.m, scale = F, center = T)
dt4newVars$para.z <- scale(dt4newVars$consp.m, scale = T)
dt4newVars$para.fa <- fa(dt4newVars %>% select(starts_with("consp")))$scores
ia.jobinsec<- dt4newVars %>%
dplyr::select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>%
na_if(., -99) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.jobinsec$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.85 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.jobinsec)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| jbInsec01 | 0.7612 | 0.8769 | -0.7671 | 1.178 |
| jbInsec03 | 0.7168 | 0.8014 | -0.0667 | 1.381 |
| jbInsec02_R | 0.6848 | 0.7530 | -0.7225 | 1.163 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>% na_if(., -99))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Job insecurity: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| jbInsec01 | 8991 | -0.7671 | 1.178 | -1 | -0.9101 | 1.483 | -2 | 2 | 4 | 0.7677 | -0.2545 | 0.0124 |
| jbInsec03 | 9773 | -0.0667 | 1.381 | 0 | -0.0834 | 1.483 | -2 | 2 | 4 | 0.0212 | -1.3020 | 0.0140 |
| jbInsec02_R | 9277 | -0.7225 | 1.163 | -1 | -0.8482 | 1.483 | -2 | 2 | 4 | 0.6778 | -0.3598 | 0.0121 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>% na_if(., -99))
cat("<br>")
dt4newVars$jobinsec.m <- scoreItems(keys=c(1,1,1),
items = dt4newVars %>% select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>% na_if(., -99),
min = -2, max = 2)$scores
as.data.frame(psych::describe(dt4newVars$jobinsec.m, skew=F)) %>%
mutate(vars = "Job insecurity") %>%
kable(., caption = "Job insecurity: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Job insecurity | 14401 | -0.5737 | 0.8535 | -2 | 2 | 4 | 0.0071 |
dt4newVars$jobinsec.c <- scale(dt4newVars$jobinsec.m, scale = F, center = T)
dt4newVars$jobinsec.z <- scale(dt4newVars$jobinsec.m, scale = T)
dt4newVars$jobinsec.fa <- fa(dt4newVars %>% select(starts_with("jbInsec"), -jbInsec02, -jbInsec04))$scores
ia.pfs<- dt4newVars %>%
dplyr::select(starts_with("PFS0")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.pfs$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.85 . Furthermore, deleting item(s) 2 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.pfs)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| PFS01 | 0.7994 | 0.9361 | -0.2150 | 1.250 |
| PFS03 | 0.7597 | 0.8602 | -0.4728 | 1.249 |
| PFS02 | 0.6109 | 0.6462 | 0.4447 | 1.212 |
cat("<br>")
as.data.frame(psych::describe(dt4newVars %>% select(starts_with("PFS0")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Financial Strain: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| PFS01 | 12185 | -0.2150 | 1.250 | 0 | -0.2687 | 1.483 | -2 | 2 | 4 | 0.184 | -1.0404 | 0.0113 |
| PFS02 | 12193 | 0.4447 | 1.212 | 1 | 0.5441 | 1.483 | -2 | 2 | 4 | -0.581 | -0.6659 | 0.0110 |
| PFS03 | 12194 | -0.4728 | 1.249 | -1 | -0.5654 | 1.483 | -2 | 2 | 4 | 0.449 | -0.8835 | 0.0113 |
cat("<br>")
pairs.panels.new(dt4newVars %>% select(starts_with("PFS0")))
cat("<br>")
dt4newVars$pfs.m <- scoreItems(keys=c(1,1,1),
items = dt4newVars %>% select(starts_with("PFS0")),
min = -2, max = 2)$scores
as.data.frame(psych::describe(dt4newVars$pfs.m, skew=F)) %>%
mutate(vars = "inancial Strain") %>%
kable(., caption = "inancial Strain: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| inancial Strain | 14401 | -0.0686 | 1 | -2 | 2 | 4 | 0.0083 |
dt4newVars$pfs.c <- scale(dt4newVars$pfs.m, scale = F, center = T)
dt4newVars$pfs.z <- scale(dt4newVars$pfs.m, scale = T)
dt4newVars$pfs.fa <- fa(dt4newVars %>% select(starts_with("PFS0")))$scores
# clean-up Item Analyses
rm(list=ls(pattern="ia"))
# remove directly identifiable data (with and without page timers)
dt5ReducedTimer <- dt4newVars %>%
select(-c(IPAddress,
RecipientLastName,
RecipientFirstName,
RecipientEmail,
ExternalReference,
LocationLatitude,
LocationLongitude,
DistributionChannel,
ICRec_1_TEXT))
dt5Reduced <- dt5ReducedTimer %>%
select(-starts_with("t_"))
# remove filtered cases (with and without page timers)
dt5ReducedTimerCases <- dt5ReducedTimer %>%
filter(FilterPreview == 0,
FilterTime == 0,
FilterStraightliner == 0) %>%
select(-starts_with("Filter"))
dt5ReducedCases <- dt5Reduced %>%
filter(FilterPreview == 0,
FilterTime == 0,
FilterStraightliner == 0) %>%
select(-starts_with("Filter"))
Export main dataframe as RData and SPSS sav files. We export versions with and without page timers
namSPSS <- paste0("data/cleaned data/Psycorona Baseline cleaned ", format(Sys.time(), format = "%F %H-%M %Z"),".sav")
namR <- paste0("data/cleaned data/Psycorona Baseline cleaned ", format(Sys.time(), format = "%F %H-%M %Z"),".RData")
namTSPSS <- paste0("data/cleaned data/Psycorona Baseline cleaned with page timer ", format(Sys.time(), format = "%F %H-%M %Z"),".sav")
namTR <- paste0("data/cleaned data/Psycorona Baseline cleaned with page timer ", format(Sys.time(), format = "%F %H-%M %Z"),".RData")
write_sav(dt5Reduced, namSPSS)
write_sav(dt5ReducedTimer, namTSPSS)
save(dt5Reduced, file = namR)
save(dt5ReducedTimer, file = namTR)
rm(list=ls(pattern="nam"))
# export for Shiny
saveRDS(dt5ReducedCases, file = "../PsyCorona-WebApp/data/reducedData.rds")